Field-name cleanups.
Signed-off-by: Keir Fraser <keir@xensource.com>
#ifndef CLONE_DOMAIN0
if ( d != dom0 )
BUG();
- if ( test_bit(DF_CONSTRUCTED, &d->d_flags) )
+ if ( test_bit(DF_CONSTRUCTED, &d->flags) )
BUG();
#endif
#endif
console_endboot(strstr(cmdline, "tty0") != NULL);
- set_bit(DF_CONSTRUCTED, &d->d_flags);
+ set_bit(DF_CONSTRUCTED, &d->flags);
new_thread(ed, pkern_entry, 0, 0);
// FIXME: Hack for keyboard input
unsigned long pkern_entry;
#ifndef DOMU_AUTO_RESTART
- if ( test_bit(DF_CONSTRUCTED, &d->d_flags) ) BUG();
+ if ( test_bit(DF_CONSTRUCTED, &d->flags) ) BUG();
#endif
printk("*** LOADING DOMAIN %d ***\n",d->id);
loaddomainelfimage(d,image_start);
printk("loaddomainelfimage returns\n");
- set_bit(DF_CONSTRUCTED, &d->d_flags);
+ set_bit(DF_CONSTRUCTED, &d->flags);
printk("calling new_thread, entry=%p\n",pkern_entry);
#ifdef DOMU_AUTO_RESTART
#define IDLE0_DOMAIN(_t) \
{ \
id: IDLE_DOMAIN_ID, \
- d_flags: 1<<DF_IDLETASK, \
+ flags: 1<<DF_IDLETASK, \
refcnt: ATOMIC_INIT(1) \
}
if (!cnt[id]--) { printk("%x",id); cnt[id] = 50; }
if (!i--) { printk("+",id); cnt[id] = 100; }
}
- clear_bit(EDF_RUNNING, &prev->ed_flags);
+ clear_bit(EDF_RUNNING, &prev->flags);
//if (!is_idle_task(next->domain) )
//send_guest_virq(next, VIRQ_TIMER);
load_region_regs(current);
printf(buf);
if (regs) show_registers(regs);
domain_pause_by_systemcontroller(current->domain);
- set_bit(DF_CRASHED, ed->domain->d_flags);
+ set_bit(DF_CRASHED, ed->domain->flags);
if (ed->domain->id == 0) {
int i = 1000000000L;
// if domain0 crashes, just periodically print out panic
if ( dom0 == NULL )
panic("Error creating domain 0\n");
- set_bit(DF_PRIVILEGED, &dom0->d_flags);
+ set_bit(DF_PRIVILEGED, &dom0->flags);
/*
* We're going to setup domain0 using the module(s) that we stashed safely
#endif
c->flags = 0;
- if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
+ if ( test_bit(EDF_DONEFPUINIT, &ed->flags) )
c->flags |= VGCF_I387_VALID;
if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) )
c->flags |= VGCF_IN_KERNEL;
d->shared_info = (void *)alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
+ ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
PAGE_SHIFT] = INVALID_M2P_ENTRY;
struct domain *d = ed->domain;
ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
ed->arch.perdomain_ptes =
- d->arch.mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT);
+ d->arch.mm_perdomain_pt + (ed->id << PDPT_VCPU_SHIFT);
ed->arch.flags = TF_kernel_mode;
}
return -EINVAL;
}
- clear_bit(EDF_DONEFPUINIT, &ed->ed_flags);
+ clear_bit(EDF_DONEFPUINIT, &ed->flags);
if ( c->flags & VGCF_I387_VALID )
- set_bit(EDF_DONEFPUINIT, &ed->ed_flags);
+ set_bit(EDF_DONEFPUINIT, &ed->flags);
ed->arch.flags &= ~TF_kernel_mode;
if ( c->flags & VGCF_IN_KERNEL )
if ( !IS_PRIV(d) )
ed->arch.guest_context.user_regs.eflags &= 0xffffcfff;
- if ( test_bit(EDF_DONEINIT, &ed->ed_flags) )
+ if ( test_bit(EDF_DONEINIT, &ed->flags) )
return 0;
if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 )
for ( i = 0; i < 8; i++ )
(void)set_debugreg(ed, i, c->debugreg[i]);
- if ( ed->eid == 0 )
+ if ( ed->id == 0 )
d->vm_assist = c->vm_assist;
phys_basetab = c->pt_base;
update_pagetables(ed);
/* Don't redo final setup */
- set_bit(EDF_DONEINIT, &ed->ed_flags);
+ set_bit(EDF_DONEINIT, &ed->flags);
return 0;
}
* 'prev' (after this point, a dying domain's info structure may be freed
* without warning).
*/
- clear_bit(EDF_RUNNING, &prev->ed_flags);
+ clear_bit(EDF_RUNNING, &prev->flags);
schedule_tail(next);
BUG();
/* Sanity! */
if ( d->id != 0 )
BUG();
- if ( test_bit(DF_CONSTRUCTED, &d->d_flags) )
+ if ( test_bit(DF_CONSTRUCTED, &d->flags) )
BUG();
memset(&dsi, 0, sizeof(struct domain_setup_info));
/* DOM0 gets access to everything. */
physdev_init_dom0(d);
- set_bit(DF_CONSTRUCTED, &d->d_flags);
+ set_bit(DF_CONSTRUCTED, &d->flags);
new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
__asm__ __volatile__ ( "fninit" );
if ( cpu_has_xmm )
load_mxcsr(0x1f80);
- set_bit(EDF_DONEFPUINIT, ¤t->ed_flags);
+ set_bit(EDF_DONEFPUINIT, ¤t->flags);
}
void save_init_fpu(struct exec_domain *tsk)
* This causes us to set the real flag, so we'll need
* to temporarily clear it while saving f-p state.
*/
- if ( test_bit(EDF_GUEST_STTS, &tsk->ed_flags) )
+ if ( test_bit(EDF_GUEST_STTS, &tsk->flags) )
clts();
if ( cpu_has_fxsr )
"fnsave %0 ; fwait"
: "=m" (tsk->arch.guest_context.fpu_ctxt) );
- clear_bit(EDF_USEDFPU, &tsk->ed_flags);
+ clear_bit(EDF_USEDFPU, &tsk->flags);
stts();
}
struct domain idle0_domain = {
id: IDLE_DOMAIN_ID,
- d_flags: 1<<DF_IDLETASK,
+ flags: 1<<DF_IDLETASK,
refcnt: ATOMIC_INIT(1)
};
* See domain.c:relinquish_list().
*/
ASSERT((x & PGT_validated) ||
- test_bit(DF_DYING, &page_get_owner(page)->d_flags));
+ test_bit(DF_DYING, &page_get_owner(page)->flags));
if ( unlikely((nx & PGT_count_mask) == 0) )
{
* it is dying.
*/
ASSERT(e->tot_pages <= e->max_pages);
- if ( unlikely(test_bit(DF_DYING, &e->d_flags)) ||
+ if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
unlikely(e->tot_pages == e->max_pages) ||
unlikely(IS_XEN_HEAP_FRAME(page)) )
{
MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
"page is in Xen heap (%lx), or dom is dying (%ld).\n",
- e->tot_pages, e->max_pages, op.mfn, e->d_flags);
+ e->tot_pages, e->max_pages, op.mfn, e->flags);
okay = 0;
goto reassign_fail;
}
* If this is a multi-processor guest then ensure that the page is hooked
* into at most one L2 table, which must be the one running on this VCPU.
*/
- if ( (d->exec_domain[0]->ed_next_list != NULL) &&
+ if ( (d->exec_domain[0]->next_in_list != NULL) &&
((page->u.inuse.type_info & PGT_count_mask) !=
(!!(page->u.inuse.type_info & PGT_pinned) +
(which == PTWR_PT_ACTIVE))) )
* Also, a domain mustn't have PGC_allocated pages when it is dying.
*/
ASSERT(e->tot_pages <= e->max_pages);
- if ( unlikely(test_bit(DF_DYING, &e->d_flags)) ||
+ if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
unlikely(e->tot_pages == e->max_pages) ||
unlikely(!gnttab_prepare_for_transfer(e, d, gntref)) )
{
MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
"provided a bad grant ref, or is dying (%p).\n",
- e->tot_pages, e->max_pages, e->d_flags);
+ e->tot_pages, e->max_pages, e->flags);
spin_unlock(&e->page_alloc_lock);
put_domain(e);
okay = 0;
BUG_ON(d->arch.iobmp_mask == NULL);
memset(d->arch.iobmp_mask, 0, IOBMP_BYTES);
- set_bit(DF_PHYSDEV, &d->d_flags);
+ set_bit(DF_PHYSDEV, &d->flags);
}
if ( dom0 == NULL )
panic("Error creating domain 0\n");
- set_bit(DF_PRIVILEGED, &dom0->d_flags);
+ set_bit(DF_PRIVILEGED, &dom0->flags);
/* Grab the DOM0 command line. Skip past the image name. */
cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
* Currently this does not fix up page ref counts, so it is valid to call
* only when a domain is being destroyed.
*/
- BUG_ON(!test_bit(DF_DYING, &d->d_flags));
+ BUG_ON(!test_bit(DF_DYING, &d->flags));
d->arch.shadow_tainted_refcnts = 1;
free_shadow_pages(d);
ed = idle->exec_domain[0];
- set_bit(DF_IDLETASK, &idle->d_flags);
+ set_bit(DF_IDLETASK, &idle->flags);
ed->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
if ( set )
{
- set_bit(EDF_GUEST_STTS, &ed->ed_flags);
+ set_bit(EDF_GUEST_STTS, &ed->flags);
stts();
}
else
{
- clear_bit(EDF_GUEST_STTS, &ed->ed_flags);
- if ( test_bit(EDF_USEDFPU, &ed->ed_flags) )
+ clear_bit(EDF_GUEST_STTS, &ed->flags);
+ if ( test_bit(EDF_USEDFPU, &ed->flags) )
clts();
}
case 0: /* Read CR0 */
*reg =
(read_cr0() & ~X86_CR0_TS) |
- (test_bit(EDF_GUEST_STTS, &ed->ed_flags) ? X86_CR0_TS : 0);
+ (test_bit(EDF_GUEST_STTS, &ed->flags) ? X86_CR0_TS : 0);
break;
case 2: /* Read CR2 */
/* Prevent recursion. */
clts();
- if ( !test_and_set_bit(EDF_USEDFPU, ¤t->ed_flags) )
+ if ( !test_and_set_bit(EDF_USEDFPU, ¤t->flags) )
{
- if ( test_bit(EDF_DONEFPUINIT, ¤t->ed_flags) )
+ if ( test_bit(EDF_DONEFPUINIT, ¤t->flags) )
restore_fpu(current);
else
init_fpu();
}
- if ( test_and_clear_bit(EDF_GUEST_STTS, ¤t->ed_flags) )
+ if ( test_and_clear_bit(EDF_GUEST_STTS, ¤t->flags) )
{
struct trap_bounce *tb = ¤t->arch.trap_bounce;
tb->flags = TBF_EXCEPTION;
{
ret = -EINVAL;
if ( (d != current->domain) &&
- test_bit(DF_CONSTRUCTED, &d->d_flags) )
+ test_bit(DF_CONSTRUCTED, &d->flags) )
{
domain_unpause_by_systemcontroller(d);
ret = 0;
if ( cpu == -1 )
{
- clear_bit(EDF_CPUPINNED, &ed->ed_flags);
+ clear_bit(EDF_CPUPINNED, &ed->flags);
}
else
{
exec_domain_pause(ed);
if ( ed->processor != (cpu % smp_num_cpus) )
- set_bit(EDF_MIGRATED, &ed->ed_flags);
- set_bit(EDF_CPUPINNED, &ed->ed_flags);
+ set_bit(EDF_MIGRATED, &ed->flags);
+ set_bit(EDF_CPUPINNED, &ed->flags);
ed->processor = cpu % smp_num_cpus;
exec_domain_unpause(ed);
}
ed = d->exec_domain[op->u.getdomaininfo.exec_domain];
op->u.getdomaininfo.flags =
- (test_bit( DF_DYING, &d->d_flags) ? DOMFLAGS_DYING : 0) |
- (test_bit( DF_CRASHED, &d->d_flags) ? DOMFLAGS_CRASHED : 0) |
- (test_bit( DF_SHUTDOWN, &d->d_flags) ? DOMFLAGS_SHUTDOWN : 0) |
- (test_bit(EDF_CTRLPAUSE, &ed->ed_flags) ? DOMFLAGS_PAUSED : 0) |
- (test_bit(EDF_BLOCKED, &ed->ed_flags) ? DOMFLAGS_BLOCKED : 0) |
- (test_bit(EDF_RUNNING, &ed->ed_flags) ? DOMFLAGS_RUNNING : 0);
+ (test_bit( DF_DYING, &d->flags) ? DOMFLAGS_DYING : 0) |
+ (test_bit( DF_CRASHED, &d->flags) ? DOMFLAGS_CRASHED : 0) |
+ (test_bit( DF_SHUTDOWN, &d->flags) ? DOMFLAGS_SHUTDOWN : 0) |
+ (test_bit(EDF_CTRLPAUSE, &ed->flags) ? DOMFLAGS_PAUSED : 0) |
+ (test_bit(EDF_BLOCKED, &ed->flags) ? DOMFLAGS_BLOCKED : 0) |
+ (test_bit(EDF_RUNNING, &ed->flags) ? DOMFLAGS_RUNNING : 0);
op->u.getdomaininfo.flags |= ed->processor << DOMFLAGS_CPUSHIFT;
op->u.getdomaininfo.flags |=
{
write_lock(&domlist_lock);
pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
- for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
+ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
if ( (*pd)->id > d->id )
break;
- d->next_list = *pd;
+ d->next_in_list = *pd;
*pd = d;
- d->next_hash = domain_hash[DOMAIN_HASH(dom_id)];
+ d->next_in_hashbucket = domain_hash[DOMAIN_HASH(dom_id)];
domain_hash[DOMAIN_HASH(dom_id)] = d;
write_unlock(&domlist_lock);
}
d = NULL;
break;
}
- d = d->next_hash;
+ d = d->next_in_hashbucket;
}
read_unlock(&domlist_lock);
struct exec_domain *ed;
domain_pause(d);
- if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
+ if ( !test_and_set_bit(DF_DYING, &d->flags) )
{
for_each_exec_domain(d, ed)
sched_rem_domain(ed);
if ( d->id == 0 )
BUG();
- set_bit(DF_CRASHED, &d->d_flags);
+ set_bit(DF_CRASHED, &d->flags);
send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
}
if ( (d->shutdown_code = reason) == SHUTDOWN_crash )
- set_bit(DF_CRASHED, &d->d_flags);
+ set_bit(DF_CRASHED, &d->flags);
else
- set_bit(DF_SHUTDOWN, &d->d_flags);
+ set_bit(DF_SHUTDOWN, &d->flags);
send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
struct domain **pd;
atomic_t old, new;
- if ( !test_bit(DF_DYING, &d->d_flags) )
+ if ( !test_bit(DF_DYING, &d->flags) )
BUG();
/* May be already destructed, or get_domain() can race us. */
write_lock(&domlist_lock);
pd = &domain_list;
while ( *pd != d )
- pd = &(*pd)->next_list;
- *pd = d->next_list;
+ pd = &(*pd)->next_in_list;
+ *pd = d->next_in_list;
pd = &domain_hash[DOMAIN_HASH(d->id)];
while ( *pd != d )
- pd = &(*pd)->next_hash;
- *pd = d->next_hash;
+ pd = &(*pd)->next_in_hashbucket;
+ *pd = d->next_in_hashbucket;
write_unlock(&domlist_lock);
destroy_event_channels(d);
if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = p->exec_domain[vcpu]) == NULL) )
return -EINVAL;
- if (test_bit(DF_CONSTRUCTED, &p->d_flags) &&
- !test_bit(EDF_CTRLPAUSE, &ed->ed_flags))
+ if (test_bit(DF_CONSTRUCTED, &p->flags) &&
+ !test_bit(EDF_CTRLPAUSE, &ed->flags))
return -EINVAL;
if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
if ( (rc = arch_set_info_guest(ed, c)) != 0 )
goto out;
- set_bit(DF_CONSTRUCTED, &p->d_flags);
+ set_bit(DF_CONSTRUCTED, &p->flags);
out:
xfree(c);
sched_add_domain(ed);
/* domain_unpause_by_systemcontroller */
- if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
+ if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->flags) )
domain_wake(ed);
xfree(c);
max = d->max_event_channel;
chn = d->event_channel;
- for ( port = ed->eid * EVENT_CHANNELS_SPREAD; port < max; port++ )
+ for ( port = ed->id * EVENT_CHANNELS_SPREAD; port < max; port++ )
if ( chn[port].state == ECS_FREE )
break;
for_each_domain ( d )
{
printk("Xen: DOM %u, flags=%lx refcnt=%d nr_pages=%d "
- "xenheap_pages=%d\n", d->id, d->d_flags,
+ "xenheap_pages=%d\n", d->id, d->flags,
atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
dump_pageframe_info(d);
printk("Guest: %p CPU %d [has=%c] flags=%lx "
"upcall_pend = %02x, upcall_mask = %02x\n", ed,
ed->processor,
- test_bit(EDF_RUNNING, &ed->ed_flags) ? 'T':'F',
- ed->ed_flags,
+ test_bit(EDF_RUNNING, &ed->flags) ? 'T':'F',
+ ed->flags,
ed->vcpu_info->evtchn_upcall_pending,
ed->vcpu_info->evtchn_upcall_mask);
- printk("Notifying guest... %d/%d\n", d->id, ed->eid);
+ printk("Notifying guest... %d/%d\n", d->id, ed->id);
printk("port %d/%d stat %d %d %d\n",
VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
test_bit(ed->virq_to_evtchn[VIRQ_DEBUG],
spin_lock(&d->page_alloc_lock);
- if ( unlikely(test_bit(DF_DYING, &d->d_flags)) ||
+ if ( unlikely(test_bit(DF_DYING, &d->flags)) ||
unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
{
DPRINTK("Over-allocation for domain %u: %u > %u\n",
d->id, d->tot_pages + (1 << order), d->max_pages);
DPRINTK("...or the domain is dying (%d)\n",
- !!test_bit(DF_DYING, &d->d_flags));
+ !!test_bit(DF_DYING, &d->flags));
spin_unlock(&d->page_alloc_lock);
free_heap_pages(MEMZONE_DOM, pg, order);
return NULL;
spin_unlock_recursive(&d->page_alloc_lock);
- if ( likely(!test_bit(DF_DYING, &d->d_flags)) )
+ if ( likely(!test_bit(DF_DYING, &d->flags)) )
{
free_heap_pages(MEMZONE_DOM, pg, order);
}
};
#define BVT_INFO(p) ((struct bvt_dom_info *)(p)->sched_priv)
-#define EBVT_INFO(p) ((struct bvt_edom_info *)(p)->ed_sched_priv)
+#define EBVT_INFO(p) ((struct bvt_edom_info *)(p)->sched_priv)
#define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
#define RUNLIST(p) ((struct list_head *)&(EBVT_INFO(p)->run_list))
#define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
return -1;
memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
}
- ed->ed_sched_priv = &BVT_INFO(d)->ed_inf[ed->eid];
- BVT_INFO(d)->ed_inf[ed->eid].inf = BVT_INFO(d);
- BVT_INFO(d)->ed_inf[ed->eid].exec_domain = ed;
+ ed->sched_priv = &BVT_INFO(d)->ed_inf[ed->id];
+ BVT_INFO(d)->ed_inf[ed->id].inf = BVT_INFO(d);
+ BVT_INFO(d)->ed_inf[ed->id].exec_domain = ed;
return 0;
}
ASSERT(inf != NULL);
ASSERT(d != NULL);
- if (d->eid == 0) {
+ if (d->id == 0) {
inf->mcu_advance = MCU_ADVANCE;
inf->domain = d->domain;
inf->warpback = 0;
}
}
-static int bvt_init_idle_task(struct exec_domain *p)
+static int bvt_init_idle_task(struct exec_domain *ed)
{
- if ( bvt_alloc_task(p) < 0 )
+ if ( bvt_alloc_task(ed) < 0 )
return -1;
- bvt_add_task(p);
+ bvt_add_task(ed);
+
+ set_bit(EDF_RUNNING, &ed->flags);
+ if ( !__task_on_runqueue(ed) )
+ __add_to_runqueue_head(ed);
- set_bit(EDF_RUNNING, &p->ed_flags);
- if ( !__task_on_runqueue(p) )
- __add_to_runqueue_head(p);
-
return 0;
}
-static void bvt_wake(struct exec_domain *d)
+static void bvt_wake(struct exec_domain *ed)
{
- struct bvt_edom_info *einf = EBVT_INFO(d);
+ struct bvt_edom_info *einf = EBVT_INFO(ed);
struct exec_domain *curr;
s_time_t now, r_time;
- int cpu = d->processor;
+ int cpu = ed->processor;
u32 curr_evt;
- if ( unlikely(__task_on_runqueue(d)) )
+ if ( unlikely(__task_on_runqueue(ed)) )
return;
- __add_to_runqueue_head(d);
+ __add_to_runqueue_head(ed);
now = NOW();
/* Set the BVT parameters. AVT should always be updated
if CPU migration ocurred.*/
if ( einf->avt < CPU_SVT(cpu) ||
- unlikely(test_bit(EDF_MIGRATED, &d->ed_flags)) )
+ unlikely(test_bit(EDF_MIGRATED, &ed->flags)) )
einf->avt = CPU_SVT(cpu);
/* Deal with warping here. */
- einf->evt = calc_evt(d, einf->avt);
+ einf->evt = calc_evt(ed, einf->avt);
curr = schedule_data[cpu].curr;
curr_evt = calc_evt(curr, calc_avt(curr, now));
}
-static void bvt_sleep(struct exec_domain *d)
+static void bvt_sleep(struct exec_domain *ed)
{
- if ( test_bit(EDF_RUNNING, &d->ed_flags) )
- cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
- else if ( __task_on_runqueue(d) )
- __del_from_runqueue(d);
+ if ( test_bit(EDF_RUNNING, &ed->flags) )
+ cpu_raise_softirq(ed->processor, SCHEDULE_SOFTIRQ);
+ else if ( __task_on_runqueue(ed) )
+ __del_from_runqueue(ed);
}
/**
struct bvt_edom_info *next_prime_einf = NULL;
struct task_slice ret;
- ASSERT(prev->ed_sched_priv != NULL);
+ ASSERT(prev->sched_priv != NULL);
ASSERT(prev_einf != NULL);
ASSERT(__task_on_runqueue(prev));
{
struct list_head *queue;
int loop = 0;
- struct bvt_edom_info *d_inf;
- struct exec_domain *d;
+ struct bvt_edom_info *ed_inf;
+ struct exec_domain *ed;
printk("svt=0x%08lX ", CPU_SVT(i));
printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
(unsigned long) queue->next, (unsigned long) queue->prev);
- list_for_each_entry ( d_inf, queue, run_list )
+ list_for_each_entry ( ed_inf, queue, run_list )
{
- d = d_inf->exec_domain;
- printk("%3d: %u has=%c ", loop++, d->domain->id,
- test_bit(EDF_RUNNING, &d->ed_flags) ? 'T':'F');
- bvt_dump_runq_el(d);
- printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
+ ed = ed_inf->exec_domain;
+ printk("%3d: %u has=%c ", loop++, ed->domain->id,
+ test_bit(EDF_RUNNING, &ed->flags) ? 'T':'F');
+ bvt_dump_runq_el(ed);
+ printk("c=0x%X%08X\n", (u32)(ed->cpu_time>>32), (u32)ed->cpu_time);
printk(" l: %p n: %p p: %p\n",
- &d_inf->run_list, d_inf->run_list.next, d_inf->run_list.prev);
+ &ed_inf->run_list, ed_inf->run_list.next, ed_inf->run_list.prev);
}
}
d->exec_domain[vcpu] = ed;
ed->domain = d;
- ed->eid = vcpu;
+ ed->id = vcpu;
if ( SCHED_OP(alloc_task, ed) < 0 )
goto out;
- if (vcpu != 0) {
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
+ if ( vcpu != 0 )
+ {
+ ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
- for_each_exec_domain(d, edc) {
- if (edc->ed_next_list == NULL || edc->ed_next_list->eid > vcpu)
+ for_each_exec_domain( d, edc )
+ {
+ if ( (edc->next_in_list == NULL) ||
+ (edc->next_in_list->id > vcpu) )
break;
}
- ed->ed_next_list = edc->ed_next_list;
- edc->ed_next_list = ed;
+ ed->next_in_list = edc->next_in_list;
+ edc->next_in_list = ed;
- if (test_bit(EDF_CPUPINNED, &edc->ed_flags)) {
+ if (test_bit(EDF_CPUPINNED, &edc->flags)) {
ed->processor = (edc->processor + 1) % smp_num_cpus;
- set_bit(EDF_CPUPINNED, &ed->ed_flags);
+ set_bit(EDF_CPUPINNED, &ed->flags);
} else {
ed->processor = (edc->processor + 1) % smp_num_cpus; /* XXX */
}
struct domain *d = ed->domain;
/* Must be unpaused by control software to start execution. */
- set_bit(EDF_CTRLPAUSE, &ed->ed_flags);
+ set_bit(EDF_CTRLPAUSE, &ed->flags);
if ( d->id != IDLE_DOMAIN_ID )
{
}
SCHED_OP(add_task, ed);
- TRACE_2D(TRC_SCHED_DOM_ADD, d->id, ed->eid);
+ TRACE_2D(TRC_SCHED_DOM_ADD, d->id, ed->id);
}
void sched_rem_domain(struct exec_domain *ed)
{
rem_ac_timer(&ed->timer);
SCHED_OP(rem_task, ed);
- TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->id, ed->eid);
+ TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->id, ed->id);
}
void init_idle_task(void)
SCHED_OP(sleep, ed);
spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
- TRACE_2D(TRC_SCHED_SLEEP, ed->domain->id, ed->eid);
+ TRACE_2D(TRC_SCHED_SLEEP, ed->domain->id, ed->id);
/* Synchronous. */
- while ( test_bit(EDF_RUNNING, &ed->ed_flags) && !domain_runnable(ed) )
+ while ( test_bit(EDF_RUNNING, &ed->flags) && !domain_runnable(ed) )
cpu_relax();
}
ed->wokenup = NOW();
#endif
}
- clear_bit(EDF_MIGRATED, &ed->ed_flags);
+ clear_bit(EDF_MIGRATED, &ed->flags);
spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
- TRACE_2D(TRC_SCHED_WAKE, ed->domain->id, ed->eid);
+ TRACE_2D(TRC_SCHED_WAKE, ed->domain->id, ed->id);
}
/* Block the currently-executing domain until a pertinent event occurs. */
struct exec_domain *ed = current;
ed->vcpu_info->evtchn_upcall_mask = 0;
- set_bit(EDF_BLOCKED, &ed->ed_flags);
+ set_bit(EDF_BLOCKED, &ed->flags);
/* Check for events /after/ blocking: avoids wakeup waiting race. */
if ( event_pending(ed) )
{
- clear_bit(EDF_BLOCKED, &ed->ed_flags);
+ clear_bit(EDF_BLOCKED, &ed->flags);
}
else
{
- TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->eid);
+ TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->id);
__enter_scheduler();
}
/* Voluntarily yield the processor for this allocation. */
static long do_yield(void)
{
- TRACE_2D(TRC_SCHED_YIELD, current->domain->id, current->eid);
+ TRACE_2D(TRC_SCHED_YIELD, current->domain->id, current->id);
__enter_scheduler();
return 0;
}
case SCHEDOP_shutdown:
{
- TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->id, current->eid,
+ TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->id, current->id,
(op >> SCHEDOP_reasonshift));
domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
break;
add_ac_timer(&schedule_data[cpu].s_timer);
/* Must be protected by the schedule_lock! */
- set_bit(EDF_RUNNING, &next->ed_flags);
+ set_bit(EDF_RUNNING, &next->flags);
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
}
TRACE_4D(TRC_SCHED_SWITCH,
- prev->domain->id, prev->eid,
- next->domain->id, next->eid);
+ prev->domain->id, prev->id,
+ next->domain->id, next->id);
context_switch(prev, next);
}
extern unsigned long xenheap_phys_end; /* user-configurable */
#endif
-#define GDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + ((ed)->eid << PDPT_VCPU_VA_SHIFT))
+#define GDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + ((ed)->id << PDPT_VCPU_VA_SHIFT))
#define GDT_VIRT_END(ed) (GDT_VIRT_START(ed) + (64*1024))
-#define LDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + (64*1024) + ((ed)->eid << PDPT_VCPU_VA_SHIFT))
+#define LDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + (64*1024) + ((ed)->id << PDPT_VCPU_VA_SHIFT))
#define LDT_VIRT_END(ed) (LDT_VIRT_START(ed) + (64*1024))
#define PDPT_VCPU_SHIFT 5
{
case TRAP_int3:
case TRAP_debug:
- set_bit(EDF_CTRLPAUSE, &ed->ed_flags);
+ set_bit(EDF_CTRLPAUSE, &ed->flags);
raise_softirq(SCHEDULE_SOFTIRQ);
return 1;
}
extern void restore_fpu(struct exec_domain *tsk);
#define unlazy_fpu(_tsk) do { \
- if ( test_bit(EDF_USEDFPU, &(_tsk)->ed_flags) ) \
+ if ( test_bit(EDF_USEDFPU, &(_tsk)->flags) ) \
save_init_fpu(_tsk); \
} while ( 0 )
* NB2. We save DF_RUNNING across the unblock to avoid a needless
* IPI for domains that we IPI'd to unblock.
*/
- running = test_bit(EDF_RUNNING, &ed->ed_flags);
+ running = test_bit(EDF_RUNNING, &ed->flags);
exec_domain_unblock(ed);
if ( running )
smp_send_event_check_cpu(ed->processor);
struct exec_domain
{
- u32 processor;
+ int id;
- vcpu_info_t *vcpu_info;
+ int processor;
- struct domain *domain;
- struct exec_domain *ed_next_list;
- int eid;
+ vcpu_info_t *vcpu_info;
+
+ struct domain *domain;
+ struct exec_domain *next_in_list;
struct ac_timer timer; /* one-shot timer for timeout values */
unsigned long sleep_tick; /* tick at which this vcpu started sleep */
s_time_t lastdeschd; /* time this domain was last descheduled */
s_time_t cpu_time; /* total CPU time received till now */
s_time_t wokenup; /* time domain got woken up */
- void *ed_sched_priv; /* scheduler-specific data */
+ void *sched_priv; /* scheduler-specific data */
- unsigned long ed_flags;
+ unsigned long flags;
- u16 virq_to_evtchn[NR_VIRQS];
+ u16 virq_to_evtchn[NR_VIRQS];
- atomic_t pausecnt;
+ atomic_t pausecnt;
struct arch_exec_domain arch;
};
int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
void *sched_priv; /* scheduler-specific data */
- struct domain *next_list, *next_hash;
+ struct domain *next_in_list;
+ struct domain *next_in_hashbucket;
/* Event channel information. */
event_channel_t *event_channel;
unsigned int max_event_channel;
spinlock_t event_channel_lock;
- grant_table_t *grant_table;
+ grant_table_t *grant_table;
/*
* Interrupt to event-channel mappings. Updates should be protected by the
* the lock, but races don't usually matter.
*/
#define NR_PIRQS 128 /* Put this somewhere sane! */
- u16 pirq_to_evtchn[NR_PIRQS];
- u32 pirq_mask[NR_PIRQS/32];
+ u16 pirq_to_evtchn[NR_PIRQS];
+ u32 pirq_mask[NR_PIRQS/32];
- unsigned long d_flags;
- unsigned long vm_assist;
+ unsigned long flags;
+ unsigned long vm_assist;
- atomic_t refcnt;
+ atomic_t refcnt;
struct exec_domain *exec_domain[MAX_VIRT_CPUS];
extern struct exec_domain *idle_task[NR_CPUS];
#define IDLE_DOMAIN_ID (0x7FFFU)
-#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->d_flags))
+#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
struct exec_domain *alloc_exec_domain_struct(struct domain *d,
unsigned long vcpu);
extern struct domain *domain_list;
#define for_each_domain(_d) \
- for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_list )
+ for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_in_list )
#define for_each_exec_domain(_d,_ed) \
for ( (_ed) = (_d)->exec_domain[0]; \
(_ed) != NULL; \
- (_ed) = (_ed)->ed_next_list )
+ (_ed) = (_ed)->next_in_list )
#define EDF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
#define EDF_USEDFPU 1 /* Has this task used the FPU since last save? */
#define EDF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
-#define DF_CONSTRUCTED 3 /* Has the guest OS been fully built yet? */
-#define DF_IDLETASK 4 /* Is this one of the per-CPU idle domains? */
-#define DF_PRIVILEGED 5 /* Is this domain privileged? */
-#define DF_PHYSDEV 6 /* May this domain do IO to physical devices? */
-#define EDF_BLOCKED 7 /* Domain is blocked waiting for an event. */
-#define EDF_CTRLPAUSE 8 /* Domain is paused by controller software. */
-#define DF_SHUTDOWN 9 /* Guest shut itself down for some reason. */
-#define DF_CRASHED 10 /* Domain crashed inside Xen, cannot continue. */
-#define DF_DYING 11 /* Death rattle. */
-#define EDF_RUNNING 12 /* Currently running on a CPU. */
-#define EDF_CPUPINNED 13 /* Disables auto-migration. */
-#define EDF_MIGRATED 14 /* Domain migrated between CPUs. */
-#define EDF_DONEINIT 15 /* Initialization completed . */
-
-static inline int domain_runnable(struct exec_domain *d)
+#define EDF_BLOCKED 3 /* Domain is blocked waiting for an event. */
+#define EDF_CTRLPAUSE 4 /* Domain is paused by controller software. */
+#define EDF_RUNNING 5 /* Currently running on a CPU. */
+#define EDF_CPUPINNED 6 /* Disables auto-migration. */
+#define EDF_MIGRATED 7 /* Domain migrated between CPUs. */
+#define EDF_DONEINIT 8 /* Initialization completed . */
+
+#define DF_CONSTRUCTED 0 /* Has the guest OS been fully built yet? */
+#define DF_IDLETASK 1 /* Is this one of the per-CPU idle domains? */
+#define DF_PRIVILEGED 2 /* Is this domain privileged? */
+#define DF_PHYSDEV 3 /* May this domain do IO to physical devices? */
+#define DF_SHUTDOWN 4 /* Guest shut itself down for some reason. */
+#define DF_CRASHED 5 /* Domain crashed inside Xen, cannot continue. */
+#define DF_DYING 6 /* Death rattle. */
+
+static inline int domain_runnable(struct exec_domain *ed)
{
- return ( (atomic_read(&d->pausecnt) == 0) &&
- !(d->ed_flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
- !(d->domain->d_flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
+ return ( (atomic_read(&ed->pausecnt) == 0) &&
+ !(ed->flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
+ !(ed->domain->flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
}
static inline void exec_domain_pause(struct exec_domain *ed)
static inline void exec_domain_unblock(struct exec_domain *ed)
{
- if ( test_and_clear_bit(EDF_BLOCKED, &ed->ed_flags) )
+ if ( test_and_clear_bit(EDF_BLOCKED, &ed->flags) )
domain_wake(ed);
}
for_each_exec_domain ( d, ed )
{
ASSERT(ed != current);
- if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
+ if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->flags) )
domain_sleep(ed);
}
for_each_exec_domain ( d, ed )
{
- if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
+ if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->flags) )
domain_wake(ed);
}
}
-#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->d_flags))
-#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->d_flags))
+#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
+#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))